From 02004df07a497ea5a45f17ac72b22f307d79a669 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Mon, 7 Jul 2014 16:29:15 +0100 Subject: [PATCH] xen/arm: Don't save/restore context for idle VCPU When an idle VCPU is running, Xen will never exit the hypervisor mode. Futhermore, some part of the VCPU/domain initialization is already skipped for them to avoid memory consumption. Actually each save/restore functions are checking themself if the vcpu is an idle one or not. We can safely skipped the context switch in one place and gain a bit of time when we {,un}schedule idle VCPU. This is because the saving part will take care of disabling anything related to guest (such as GICv). Also replace every check of and idle VCPU in save/restore functions by an ASSERT, to know if someone is calling them with an idle VCPU in argument. Signed-off-by: Julien Grall Acked-by: Ian Campbell --- xen/arch/arm/domain.c | 14 ++++++++++++++ xen/arch/arm/gic.c | 5 ++--- xen/arch/arm/vtimer.c | 6 ++---- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index bb5c810756..0c16f23e4d 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -60,6 +60,12 @@ void idle_loop(void) static void ctxt_switch_from(struct vcpu *p) { + /* When the idle VCPU is running, Xen will always stay in hypervisor + * mode. Therefore we don't need to save the context of an idle VCPU. + */ + if ( is_idle_vcpu(p) ) + goto end_context; + p2m_save_state(p); /* CP 15 */ @@ -132,11 +138,19 @@ static void ctxt_switch_from(struct vcpu *p) gic_save_state(p); isb(); + +end_context: context_saved(p); } static void ctxt_switch_to(struct vcpu *n) { + /* When the idle VCPU is running, Xen will always stay in hypervisor + * mode. Therefore we don't need to restore the context of an idle VCPU. + */ + if ( is_idle_vcpu(n) ) + return; + p2m_restore_state(n); WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2); diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c index e1e27b359d..83b004c4ea 100644 --- a/xen/arch/arm/gic.c +++ b/xen/arch/arm/gic.c @@ -69,6 +69,7 @@ unsigned int gic_number_lines(void) void gic_save_state(struct vcpu *v) { ASSERT(!local_irq_is_enabled()); + ASSERT(!is_idle_vcpu(v)); /* No need for spinlocks here because interrupts are disabled around * this call and it only accesses struct vcpu fields that cannot be @@ -82,9 +83,7 @@ void gic_save_state(struct vcpu *v) void gic_restore_state(struct vcpu *v) { ASSERT(!local_irq_is_enabled()); - - if ( is_idle_vcpu(v) ) - return; + ASSERT(!is_idle_vcpu(v)); this_cpu(lr_mask) = v->arch.lr_mask; gic_hw_ops->restore_state(v); diff --git a/xen/arch/arm/vtimer.c b/xen/arch/arm/vtimer.c index 690657b9c7..2e95cebac1 100644 --- a/xen/arch/arm/vtimer.c +++ b/xen/arch/arm/vtimer.c @@ -94,8 +94,7 @@ void vcpu_timer_destroy(struct vcpu *v) int virt_timer_save(struct vcpu *v) { - if ( is_idle_domain(v->domain) ) - return 0; + ASSERT(!is_idle_vcpu(v)); v->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0); WRITE_SYSREG32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL_EL0); @@ -111,8 +110,7 @@ int virt_timer_save(struct vcpu *v) int virt_timer_restore(struct vcpu *v) { - if ( is_idle_domain(v->domain) ) - return 0; + ASSERT(!is_idle_vcpu(v)); stop_timer(&v->arch.virt_timer.timer); migrate_timer(&v->arch.virt_timer.timer, v->processor); -- 2.30.2